-/* block-async.c\r
- * \r
- * Asynchronous block wrappers for parallax.\r
- */\r
- \r
- \r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <string.h>\r
-#include <pthread.h>\r
-#include "block-async.h"\r
-#include "blockstore.h"\r
-#include "vdi.h"\r
-\r
-\r
-#if 0\r
-#define DPRINTF(_f, _a...) printf ( _f , ## _a )\r
-#else\r
-#define DPRINTF(_f, _a...) ((void)0)\r
-#endif\r
-\r
-/* We have a queue of outstanding I/O requests implemented as a \r
- * circular producer-consumer ring with free-running buffers.\r
- * to allow reordering, this ring indirects to indexes in an \r
- * ring of io_structs.\r
- * \r
- * the block_* calls may either add an entry to this ring and return, \r
- * or satisfy the request immediately and call the callback directly.\r
- * None of the io calls in parallax should be nested enough to worry \r
- * about stack problems with this approach.\r
- */\r
-\r
-struct read_args {\r
+/* block-async.c
+ *
+ * Asynchronous block wrappers for parallax.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include "block-async.h"
+#include "blockstore.h"
+#include "vdi.h"
+
+
+#if 0
+#define DPRINTF(_f, _a...) printf ( _f , ## _a )
+#else
+#define DPRINTF(_f, _a...) ((void)0)
+#endif
+
+/* We have a queue of outstanding I/O requests implemented as a
+ * circular producer-consumer ring with free-running buffers.
+ * to allow reordering, this ring indirects to indexes in an
+ * ring of io_structs.
+ *
+ * the block_* calls may either add an entry to this ring and return,
+ * or satisfy the request immediately and call the callback directly.
+ * None of the io calls in parallax should be nested enough to worry
+ * about stack problems with this approach.
+ */
+
+struct read_args {
u64 addr;
-};\r
-\r
-struct write_args {\r
+};
+
+struct write_args {
u64 addr;
char *block;
-};\r
-\r
-struct alloc_args {\r
+};
+
+struct alloc_args {
char *block;
-};\r
- \r
-struct pending_io_req {\r
+};
+
+struct pending_io_req {
enum {IO_READ, IO_WRITE, IO_ALLOC, IO_RWAKE, IO_WWAKE} op;
union {
struct read_args r;
} u;
io_cb_t cb;
void *param;
-};\r
-\r
-void radix_lock_init(struct radix_lock *r)\r
-{\r
+};
+
+void radix_lock_init(struct radix_lock *r)
+{
int i;
pthread_mutex_init(&r->lock, NULL);
r->waiters[i] = NULL;
r->state[i] = ANY;
}
-}\r
-\r
-/* maximum outstanding I/O requests issued asynchronously */\r
-/* must be a power of 2.*/\r
+}
+
+/* maximum outstanding I/O requests issued asynchronously */
+/* must be a power of 2.*/
#define MAX_PENDING_IO 1024
-\r
-/* how many threads to concurrently issue I/O to the disk. */\r
+
+/* how many threads to concurrently issue I/O to the disk. */
#define IO_POOL_SIZE 10
-\r
-static struct pending_io_req pending_io_reqs[MAX_PENDING_IO];\r
-static int pending_io_list[MAX_PENDING_IO];\r
-static unsigned long io_prod = 0, io_cons = 0, io_free = 0;\r
-#define PENDING_IO_MASK(_x) ((_x) & (MAX_PENDING_IO - 1))\r
-#define PENDING_IO_IDX(_x) ((_x) - pending_io_reqs)\r
-#define PENDING_IO_ENT(_x) \\r
- (&pending_io_reqs[pending_io_list[PENDING_IO_MASK(_x)]])\r
-#define CAN_PRODUCE_PENDING_IO ((io_free + MAX_PENDING_IO) != io_prod)\r
-#define CAN_CONSUME_PENDING_IO (io_cons != io_prod)\r
-static pthread_mutex_t pending_io_lock = PTHREAD_MUTEX_INITIALIZER;\r
-static pthread_cond_t pending_io_cond = PTHREAD_COND_INITIALIZER;\r
-\r
-static void init_pending_io(void)\r
-{\r
+
+static struct pending_io_req pending_io_reqs[MAX_PENDING_IO];
+static int pending_io_list[MAX_PENDING_IO];
+static unsigned long io_prod = 0, io_cons = 0, io_free = 0;
+#define PENDING_IO_MASK(_x) ((_x) & (MAX_PENDING_IO - 1))
+#define PENDING_IO_IDX(_x) ((_x) - pending_io_reqs)
+#define PENDING_IO_ENT(_x) \
+ (&pending_io_reqs[pending_io_list[PENDING_IO_MASK(_x)]])
+#define CAN_PRODUCE_PENDING_IO ((io_free + MAX_PENDING_IO) != io_prod)
+#define CAN_CONSUME_PENDING_IO (io_cons != io_prod)
+static pthread_mutex_t pending_io_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t pending_io_cond = PTHREAD_COND_INITIALIZER;
+
+static void init_pending_io(void)
+{
int i;
- \r
+
for (i=0; i<MAX_PENDING_IO; i++)
pending_io_list[i] = i;
- \r
-} \r
-\r
-void block_read(u64 addr, io_cb_t cb, void *param)\r
-{\r
+
+}
+
+void block_read(u64 addr, io_cb_t cb, void *param)
+{
struct pending_io_req *req;
pthread_mutex_lock(&pending_io_lock);
req->cb = cb;
req->param = param;
- pthread_cond_signal(&pending_io_cond);\r
+ pthread_cond_signal(&pending_io_cond);
pthread_mutex_unlock(&pending_io_lock);
-}\r
-\r
-\r
-void block_write(u64 addr, char *block, io_cb_t cb, void *param)\r
-{\r
+}
+
+
+void block_write(u64 addr, char *block, io_cb_t cb, void *param)
+{
struct pending_io_req *req;
pthread_mutex_lock(&pending_io_lock);
req->cb = cb;
req->param = param;
- pthread_cond_signal(&pending_io_cond);\r
+ pthread_cond_signal(&pending_io_cond);
pthread_mutex_unlock(&pending_io_lock);
-}\r
-\r
-\r
-void block_alloc(char *block, io_cb_t cb, void *param)\r
-{\r
+}
+
+
+void block_alloc(char *block, io_cb_t cb, void *param)
+{
struct pending_io_req *req;
- \r
+
pthread_mutex_lock(&pending_io_lock);
assert(CAN_PRODUCE_PENDING_IO);
req->cb = cb;
req->param = param;
- pthread_cond_signal(&pending_io_cond);\r
+ pthread_cond_signal(&pending_io_cond);
pthread_mutex_unlock(&pending_io_lock);
-}\r
-\r
-void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param)\r
-{\r
+}
+
+void block_rlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
+{
struct io_ret ret;
pthread_mutex_lock(&r->lock);
pthread_mutex_unlock(&r->lock);
return;
}
-}\r
-\r
-\r
-void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param)\r
-{\r
+}
+
+
+void block_wlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
+{
struct io_ret ret;
pthread_mutex_lock(&r->lock);
pthread_mutex_unlock(&r->lock);
return;
}
- \r
-}\r
-\r
-/* called with radix_lock locked and lock count of zero. */\r
-static void wake_waiters(struct radix_lock *r, int row)\r
-{\r
+
+}
+
+/* called with radix_lock locked and lock count of zero. */
+static void wake_waiters(struct radix_lock *r, int row)
+{
struct pending_io_req *req;
struct radix_wait *rw;
}
pthread_mutex_lock(&pending_io_lock);
- pthread_cond_signal(&pending_io_cond);\r
+ pthread_cond_signal(&pending_io_cond);
pthread_mutex_unlock(&pending_io_lock);
-}\r
-\r
-void block_runlock(struct radix_lock *r, int row, io_cb_t cb, void *param)\r
-{\r
+}
+
+void block_runlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
+{
struct io_ret ret;
- \r
+
pthread_mutex_lock(&r->lock);
assert(r->lines[row] > 0); /* try to catch misuse. */
r->lines[row]--;
}
pthread_mutex_unlock(&r->lock);
cb(ret, param);
-}\r
-\r
-void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param)\r
-{\r
+}
+
+void block_wunlock(struct radix_lock *r, int row, io_cb_t cb, void *param)
+{
struct io_ret ret;
pthread_mutex_lock(&r->lock);
wake_waiters(r, row);
pthread_mutex_unlock(&r->lock);
cb(ret, param);
-}\r
-\r
-/* consumer calls */\r
-static void do_next_io_req(struct pending_io_req *req)\r
-{\r
+}
+
+/* consumer calls */
+static void do_next_io_req(struct pending_io_req *req)
+{
struct io_ret ret;
void *param;
pthread_mutex_lock(&pending_io_lock);
pending_io_list[PENDING_IO_MASK(io_free++)] = PENDING_IO_IDX(req);
pthread_mutex_unlock(&pending_io_lock);
- \r
+
assert(req->cb != NULL);
req->cb(ret, param);
-}\r
-\r
-void *io_thread(void *param) \r
-{\r
+}
+
+void *io_thread(void *param)
+{
int tid;
struct pending_io_req *req;
/* Set this thread's tid. */
- tid = *(int *)param;\r
- free(param);\r
- \r
-start:\r
- pthread_mutex_lock(&pending_io_lock);\r
- while (io_prod == io_cons) {\r
- pthread_cond_wait(&pending_io_cond, &pending_io_lock);\r
- }\r
- \r
- if (io_prod == io_cons) {\r
- /* unnecessary wakeup. */\r
- pthread_mutex_unlock(&pending_io_lock);\r
- goto start;\r
- }\r
- \r
+ tid = *(int *)param;
+ free(param);
+
+start:
+ pthread_mutex_lock(&pending_io_lock);
+ while (io_prod == io_cons) {
+ pthread_cond_wait(&pending_io_cond, &pending_io_lock);
+ }
+
+ if (io_prod == io_cons) {
+ /* unnecessary wakeup. */
+ pthread_mutex_unlock(&pending_io_lock);
+ goto start;
+ }
+
req = PENDING_IO_ENT(io_cons++);
pthread_mutex_unlock(&pending_io_lock);
- \r
- do_next_io_req(req);\r
- \r
+
+ do_next_io_req(req);
+
goto start;
- \r
-}\r
-\r
-static pthread_t io_pool[IO_POOL_SIZE];\r
-void start_io_threads(void)\r
-\r
-{ \r
+
+}
+
+static pthread_t io_pool[IO_POOL_SIZE];
+void start_io_threads(void)
+
+{
int i, tid=0;
for (i=0; i < IO_POOL_SIZE; i++) {
- int ret, *t;\r
- t = (int *)malloc(sizeof(int));\r
- *t = tid++;\r
- ret = pthread_create(&io_pool[i], NULL, io_thread, t);\r
- if (ret != 0) printf("Error starting thread %d\n", i);\r
- }\r
- \r
-}\r
-\r
-void init_block_async(void)\r
-{\r
+ int ret, *t;
+ t = (int *)malloc(sizeof(int));
+ *t = tid++;
+ ret = pthread_create(&io_pool[i], NULL, io_thread, t);
+ if (ret != 0) printf("Error starting thread %d\n", i);
+ }
+
+}
+
+void init_block_async(void)
+{
init_pending_io();
start_io_threads();
-}\r
+}
/* requests-async.c
- *\r
+ *
* asynchronous request dispatcher for radix access in parallax.
- */\r
-\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <string.h>\r
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
#include <ctype.h>
-#include <assert.h>\r
-#include <pthread.h>\r
+#include <assert.h>
+#include <pthread.h>
#include <err.h>
#include <zlib.h> /* for crc32() */
-#include "requests-async.h"\r
-#include "vdi.h"\r
-#include "radix.h"\r
-\r
-#define L1_IDX(_a) (((_a) & 0x0000000007fc0000ULL) >> 18)\r
-#define L2_IDX(_a) (((_a) & 0x000000000003fe00ULL) >> 9)\r
-#define L3_IDX(_a) (((_a) & 0x00000000000001ffULL))\r
-\r
-\r
-#if 0\r
-#define DPRINTF(_f, _a...) printf ( _f , ## _a )\r
-#else\r
-#define DPRINTF(_f, _a...) ((void)0)\r
-#endif\r
-\r
+#include "requests-async.h"
+#include "vdi.h"
+#include "radix.h"
+
+#define L1_IDX(_a) (((_a) & 0x0000000007fc0000ULL) >> 18)
+#define L2_IDX(_a) (((_a) & 0x000000000003fe00ULL) >> 9)
+#define L3_IDX(_a) (((_a) & 0x00000000000001ffULL))
+
+
+#if 0
+#define DPRINTF(_f, _a...) printf ( _f , ## _a )
+#else
+#define DPRINTF(_f, _a...) ((void)0)
+#endif
+
struct block_info {
u32 crc;
u32 unused;
};
-\r
-struct io_req {\r
- enum { IO_OP_READ, IO_OP_WRITE } op;\r
- u64 root;\r
- u64 vaddr;\r
- int state;\r
- io_cb_t cb;\r
- void *param;\r
- struct radix_lock *lock;\r
-\r
- /* internal stuff: */\r
+
+struct io_req {
+ enum { IO_OP_READ, IO_OP_WRITE } op;
+ u64 root;
+ u64 vaddr;
+ int state;
+ io_cb_t cb;
+ void *param;
+ struct radix_lock *lock;
+
+ /* internal stuff: */
struct io_ret retval;/* holds the return while we unlock. */
char *block; /* the block to write */
radix_tree_node radix[3];
u64 radix_addr[3];
struct block_info bi;
-};\r
-\r
-void clear_w_bits(radix_tree_node node) \r
-{\r
+};
+
+void clear_w_bits(radix_tree_node node)
+{
int i;
for (i=0; i<RADIX_TREE_MAP_ENTRIES; i++)
node[i] = node[i] & ONEMASK;
for (i=0; i<RADIX_TREE_MAP_ENTRIES; i+=2)
node[i] = node[i] & ONEMASK;
return;
-}\r
-\r
-enum states {\r
- /* both */\r
- READ_L1,\r
- READ_L2,\r
- READ_L3,\r
-\r
- /* read */\r
- READ_LOCKED,\r
- READ_DATA,\r
- READ_UNLOCKED,\r
- RETURN_ZERO,\r
-\r
- /* write */\r
- WRITE_LOCKED,\r
- WRITE_DATA,\r
+}
+
+enum states {
+ /* both */
+ READ_L1,
+ READ_L2,
+ READ_L3,
+
+ /* read */
+ READ_LOCKED,
+ READ_DATA,
+ READ_UNLOCKED,
+ RETURN_ZERO,
+
+ /* write */
+ WRITE_LOCKED,
+ WRITE_DATA,
WRITE_L3,
- WRITE_UNLOCKED,\r
- \r
- /* L3 Zero Path */\r
- ALLOC_DATA_L3z,\r
- WRITE_L3_L3z,\r
- \r
- /* L3 Fault Path */\r
- ALLOC_DATA_L3f,\r
- WRITE_L3_L3f,\r
- \r
- /* L2 Zero Path */\r
- ALLOC_DATA_L2z,\r
- WRITE_L2_L2z,\r
- ALLOC_L3_L2z,\r
- WRITE_L2_L3z,\r
- \r
- /* L2 Fault Path */\r
- READ_L3_L2f,\r
- ALLOC_DATA_L2f,\r
- WRITE_L2_L2f,\r
- ALLOC_L3_L2f,\r
- WRITE_L2_L3f,\r
-\r
+ WRITE_UNLOCKED,
+
+ /* L3 Zero Path */
+ ALLOC_DATA_L3z,
+ WRITE_L3_L3z,
+
+ /* L3 Fault Path */
+ ALLOC_DATA_L3f,
+ WRITE_L3_L3f,
+
+ /* L2 Zero Path */
+ ALLOC_DATA_L2z,
+ WRITE_L2_L2z,
+ ALLOC_L3_L2z,
+ WRITE_L2_L3z,
+
+ /* L2 Fault Path */
+ READ_L3_L2f,
+ ALLOC_DATA_L2f,
+ WRITE_L2_L2f,
+ ALLOC_L3_L2f,
+ WRITE_L2_L3f,
+
/* L1 Zero Path */
- ALLOC_DATA_L1z,\r
- ALLOC_L3_L1z,\r
- ALLOC_L2_L1z,\r
- WRITE_L1_L1z,\r
-\r
+ ALLOC_DATA_L1z,
+ ALLOC_L3_L1z,
+ ALLOC_L2_L1z,
+ WRITE_L1_L1z,
+
/* L1 Fault Path */
READ_L2_L1f,
READ_L3_L1f,
- ALLOC_DATA_L1f,\r
- ALLOC_L3_L1f,\r
- ALLOC_L2_L1f,\r
- WRITE_L1_L1f,\r
- \r
-};\r
-\r
-enum radix_offsets {\r
- L1 = 0, \r
- L2 = 1,\r
- L3 = 2\r
-};\r
-\r
-\r
-static void read_cb(struct io_ret ret, void *param);\r
-static void write_cb(struct io_ret ret, void *param);\r
-\r
+ ALLOC_DATA_L1f,
+ ALLOC_L3_L1f,
+ ALLOC_L2_L1f,
+ WRITE_L1_L1f,
+
+};
+
+enum radix_offsets {
+ L1 = 0,
+ L2 = 1,
+ L3 = 2
+};
+
+
+static void read_cb(struct io_ret ret, void *param);
+static void write_cb(struct io_ret ret, void *param);
+
int vdi_read(vdi_t *vdi, u64 vaddr, io_cb_t cb, void *param)
-{\r
- struct io_req *req;\r
-\r
+{
+ struct io_req *req;
+
if (!VALID_VADDR(vaddr)) return ERR_BAD_VADDR;
/* Every second line in the bottom-level radix tree is used to */
/* store crc32 values etc. We shift the vadder here to achied this. */
vaddr <<= 1;
-\r
- req = (struct io_req *)malloc(sizeof (struct io_req));\r
+
+ req = (struct io_req *)malloc(sizeof (struct io_req));
if (req == NULL) return ERR_NOMEM;
-\r
+
req->radix[0] = req->radix[1] = req->radix[2] = NULL;
- req->op = IO_OP_READ;\r
- req->root = vdi->radix_root;\r
- req->lock = vdi->radix_lock; \r
- req->vaddr = vaddr;\r
- req->cb = cb;\r
- req->param = param;\r
- req->state = READ_LOCKED;\r
-\r
+ req->op = IO_OP_READ;
+ req->root = vdi->radix_root;
+ req->lock = vdi->radix_lock;
+ req->vaddr = vaddr;
+ req->cb = cb;
+ req->param = param;
+ req->state = READ_LOCKED;
+
block_rlock(req->lock, L1_IDX(vaddr), read_cb, req);
- \r
- return 0;\r
-}\r
-\r
-\r
+
+ return 0;
+}
+
+
int vdi_write(vdi_t *vdi, u64 vaddr, char *block,
io_cb_t cb, void *param)
-{\r
- struct io_req *req;\r
-\r
+{
+ struct io_req *req;
+
if (!VALID_VADDR(vaddr)) return ERR_BAD_VADDR;
/* Every second line in the bottom-level radix tree is used to */
/* store crc32 values etc. We shift the vadder here to achied this. */
vaddr <<= 1;
-\r
- req = (struct io_req *)malloc(sizeof (struct io_req));\r
+
+ req = (struct io_req *)malloc(sizeof (struct io_req));
if (req == NULL) return ERR_NOMEM;
req->radix[0] = req->radix[1] = req->radix[2] = NULL;
req->bi.crc = (u32) crc32(0L, Z_NULL, 0);
req->bi.crc = (u32) crc32(req->bi.crc, block, BLOCK_SIZE);
req->bi.unused = 0xdeadbeef;
-\r
+
req->cb = cb;
req->param = param;
- req->radix_addr[L1] = getid(req->root); /* for consistency */\r
+ req->radix_addr[L1] = getid(req->root); /* for consistency */
req->state = WRITE_LOCKED;
-\r
+
block_wlock(req->lock, L1_IDX(vaddr), write_cb, req);
-\r
-\r
+
+
return 0;
-}\r
-\r
+}
+
static void read_cb(struct io_ret ret, void *param)
-{\r
- struct io_req *req = (struct io_req *)param;\r
- radix_tree_node node;\r
- u64 idx;\r
- char *block;\r
- void *req_param;\r
-\r
- DPRINTF("read_cb\n");\r
- /* get record */\r
- switch(req->state) {\r
- \r
- case READ_LOCKED: \r
- \r
- DPRINTF("READ_LOCKED\n");\r
- req->state = READ_L1;\r
- block_read(getid(req->root), read_cb, req); \r
- break;\r
- \r
- case READ_L1: /* block is the radix root */\r
-\r
- DPRINTF("READ_L1\n");\r
- block = IO_BLOCK(ret);\r
- if (block == NULL) goto fail;\r
- node = (radix_tree_node) block;\r
- idx = getid( node[L1_IDX(req->vaddr)] );\r
- free(block);\r
- if ( idx == ZERO ) {\r
+{
+ struct io_req *req = (struct io_req *)param;
+ radix_tree_node node;
+ u64 idx;
+ char *block;
+ void *req_param;
+
+ DPRINTF("read_cb\n");
+ /* get record */
+ switch(req->state) {
+
+ case READ_LOCKED:
+
+ DPRINTF("READ_LOCKED\n");
+ req->state = READ_L1;
+ block_read(getid(req->root), read_cb, req);
+ break;
+
+ case READ_L1: /* block is the radix root */
+
+ DPRINTF("READ_L1\n");
+ block = IO_BLOCK(ret);
+ if (block == NULL) goto fail;
+ node = (radix_tree_node) block;
+ idx = getid( node[L1_IDX(req->vaddr)] );
+ free(block);
+ if ( idx == ZERO ) {
req->state = RETURN_ZERO;
block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
- } else {\r
+ } else {
req->state = READ_L2;
block_read(idx, read_cb, req);
- }\r
- break;\r
-\r
- case READ_L2:\r
-\r
- DPRINTF("READ_L2\n");\r
- block = IO_BLOCK(ret);\r
- if (block == NULL) goto fail;\r
- node = (radix_tree_node) block;\r
- idx = getid( node[L2_IDX(req->vaddr)] );\r
- free(block);\r
- if ( idx == ZERO ) {\r
+ }
+ break;
+
+ case READ_L2:
+
+ DPRINTF("READ_L2\n");
+ block = IO_BLOCK(ret);
+ if (block == NULL) goto fail;
+ node = (radix_tree_node) block;
+ idx = getid( node[L2_IDX(req->vaddr)] );
+ free(block);
+ if ( idx == ZERO ) {
req->state = RETURN_ZERO;
block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
- } else {\r
+ } else {
req->state = READ_L3;
block_read(idx, read_cb, req);
- }\r
- break;\r
-\r
- case READ_L3:\r
+ }
+ break;
+
+ case READ_L3:
{
struct block_info *bi;
- DPRINTF("READ_L3\n");\r
- block = IO_BLOCK(ret);\r
- if (block == NULL) goto fail;\r
- node = (radix_tree_node) block;\r
- idx = getid( node[L3_IDX(req->vaddr)] );\r
+ DPRINTF("READ_L3\n");
+ block = IO_BLOCK(ret);
+ if (block == NULL) goto fail;
+ node = (radix_tree_node) block;
+ idx = getid( node[L3_IDX(req->vaddr)] );
bi = (struct block_info *) &node[L3_IDX(req->vaddr) + 1];
req->bi = *bi;
- free(block);\r
- if ( idx == ZERO ) {\r
+ free(block);
+ if ( idx == ZERO ) {
req->state = RETURN_ZERO;
block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
- } else {\r
+ } else {
req->state = READ_DATA;
block_read(idx, read_cb, req);
- }\r
- break;\r
+ }
+ break;
}
- case READ_DATA:\r
+ case READ_DATA:
{
u32 crc;
- DPRINTF("READ_DATA\n");\r
+ DPRINTF("READ_DATA\n");
block = IO_BLOCK(ret);
if (block == NULL) goto fail;
/* goto fail; */
}
- req->retval = ret;\r
- req->state = READ_UNLOCKED;\r
- block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);\r
- break;\r
+ req->retval = ret;
+ req->state = READ_UNLOCKED;
+ block_runlock(req->lock, L1_IDX(req->vaddr), read_cb, req);
+ break;
}
- case READ_UNLOCKED:\r
+ case READ_UNLOCKED:
{
struct io_ret r;
io_cb_t cb;
- DPRINTF("READ_UNLOCKED\n");\r
- req_param = req->param;\r
- r = req->retval;\r
- cb = req->cb;\r
- free(req);\r
- cb(r, req_param);\r
- break;\r
- }\r
- \r
- case RETURN_ZERO:\r
+ DPRINTF("READ_UNLOCKED\n");
+ req_param = req->param;
+ r = req->retval;
+ cb = req->cb;
+ free(req);
+ cb(r, req_param);
+ break;
+ }
+
+ case RETURN_ZERO:
{
struct io_ret r;
io_cb_t cb;
DPRINTF("RETURN_ZERO\n");
req_param = req->param;
- cb = req->cb;\r
+ cb = req->cb;
free(req);
- r.type = IO_BLOCK_T;\r
- r.u.b = newblock();\r
+ r.type = IO_BLOCK_T;
+ r.u.b = newblock();
cb(r, req_param);
break;
}
- \r
- default:\r
- DPRINTF("*** Write: Bad state! (%d) ***\n", req->state);\r
- goto fail;\r
- }\r
- \r
- return;\r
-\r
- fail:\r
+
+ default:
+ DPRINTF("*** Write: Bad state! (%d) ***\n", req->state);
+ goto fail;
+ }
+
+ return;
+
+ fail:
{
struct io_ret r;
io_cb_t cb;
DPRINTF("asyn_read had a read error.\n");
- req_param = req->param;\r
- r = ret;\r
- cb = req->cb;\r
- free(req);\r
- cb(r, req_param);\r
+ req_param = req->param;
+ r = ret;
+ cb = req->cb;
+ free(req);
+ cb(r, req_param);
}
-\r
-\r
-}\r
-\r
+
+
+}
+
static void write_cb(struct io_ret r, void *param)
-{\r
- struct io_req *req = (struct io_req *)param;\r
- radix_tree_node node;\r
- u64 a, addr;\r
- void *req_param;\r
+{
+ struct io_req *req = (struct io_req *)param;
+ radix_tree_node node;
+ u64 a, addr;
+ void *req_param;
struct block_info *bi;
-\r
- switch(req->state) {\r
- \r
- case WRITE_LOCKED:\r
+
+ switch(req->state) {
+
+ case WRITE_LOCKED:
- DPRINTF("WRITE_LOCKED (%llu)\n", L1_IDX(req->vaddr));\r
- req->state = READ_L1;\r
- block_read(getid(req->root), write_cb, req); \r
- break;\r
- \r
- case READ_L1: /* block is the radix root */\r
-\r
- DPRINTF("READ_L1\n");\r
- node = (radix_tree_node) IO_BLOCK(r);\r
- if (node == NULL) goto fail;\r
- a = node[L1_IDX(req->vaddr)];\r
- addr = getid(a);\r
-\r
- req->radix_addr[L2] = addr;\r
- req->radix[L1] = node;\r
-\r
- if ( addr == ZERO ) {\r
+ DPRINTF("WRITE_LOCKED (%llu)\n", L1_IDX(req->vaddr));
+ req->state = READ_L1;
+ block_read(getid(req->root), write_cb, req);
+ break;
+
+ case READ_L1: /* block is the radix root */
+
+ DPRINTF("READ_L1\n");
+ node = (radix_tree_node) IO_BLOCK(r);
+ if (node == NULL) goto fail;
+ a = node[L1_IDX(req->vaddr)];
+ addr = getid(a);
+
+ req->radix_addr[L2] = addr;
+ req->radix[L1] = node;
+
+ if ( addr == ZERO ) {
/* L1 empty subtree: */
req->state = ALLOC_DATA_L1z;
block_alloc( req->block, write_cb, req );
- } else if ( !iswritable(a) ) {\r
- /* L1 fault: */\r
- req->state = READ_L2_L1f;\r
- block_read( addr, write_cb, req );\r
- } else {\r
- req->state = READ_L2;\r
- block_read( addr, write_cb, req );\r
- }\r
- break;\r
- \r
- case READ_L2:\r
-\r
- DPRINTF("READ_L2\n");\r
- node = (radix_tree_node) IO_BLOCK(r);\r
- if (node == NULL) goto fail;\r
- a = node[L2_IDX(req->vaddr)];\r
- addr = getid(a);\r
-\r
- req->radix_addr[L3] = addr;\r
- req->radix[L2] = node;\r
-\r
- if ( addr == ZERO ) {\r
+ } else if ( !iswritable(a) ) {
+ /* L1 fault: */
+ req->state = READ_L2_L1f;
+ block_read( addr, write_cb, req );
+ } else {
+ req->state = READ_L2;
+ block_read( addr, write_cb, req );
+ }
+ break;
+
+ case READ_L2:
+
+ DPRINTF("READ_L2\n");
+ node = (radix_tree_node) IO_BLOCK(r);
+ if (node == NULL) goto fail;
+ a = node[L2_IDX(req->vaddr)];
+ addr = getid(a);
+
+ req->radix_addr[L3] = addr;
+ req->radix[L2] = node;
+
+ if ( addr == ZERO ) {
/* L2 empty subtree: */
- req->state = ALLOC_DATA_L2z;\r
- block_alloc( req->block, write_cb, req );\r
- } else if ( !iswritable(a) ) {\r
- /* L2 fault: */\r
- req->state = READ_L3_L2f;\r
- block_read( addr, write_cb, req );\r
- } else {\r
- req->state = READ_L3;\r
- block_read( addr, write_cb, req );\r
- }\r
- break;\r
- \r
- case READ_L3:\r
-\r
- DPRINTF("READ_L3\n");\r
- node = (radix_tree_node) IO_BLOCK(r);\r
- if (node == NULL) goto fail;\r
- a = node[L3_IDX(req->vaddr)];\r
- addr = getid(a);\r
-\r
- req->radix[L3] = node;\r
-\r
- if ( addr == ZERO ) {\r
- /* L3 fault: */\r
- req->state = ALLOC_DATA_L3z;\r
- block_alloc( req->block, write_cb, req );\r
- } else if ( !iswritable(a) ) {\r
- /* L3 fault: */\r
- req->state = ALLOC_DATA_L3f;\r
- block_alloc( req->block, write_cb, req );\r
- } else {\r
- req->state = WRITE_DATA;\r
- block_write( addr, req->block, write_cb, req );\r
- }\r
- break;\r
- \r
+ req->state = ALLOC_DATA_L2z;
+ block_alloc( req->block, write_cb, req );
+ } else if ( !iswritable(a) ) {
+ /* L2 fault: */
+ req->state = READ_L3_L2f;
+ block_read( addr, write_cb, req );
+ } else {
+ req->state = READ_L3;
+ block_read( addr, write_cb, req );
+ }
+ break;
+
+ case READ_L3:
+
+ DPRINTF("READ_L3\n");
+ node = (radix_tree_node) IO_BLOCK(r);
+ if (node == NULL) goto fail;
+ a = node[L3_IDX(req->vaddr)];
+ addr = getid(a);
+
+ req->radix[L3] = node;
+
+ if ( addr == ZERO ) {
+ /* L3 fault: */
+ req->state = ALLOC_DATA_L3z;
+ block_alloc( req->block, write_cb, req );
+ } else if ( !iswritable(a) ) {
+ /* L3 fault: */
+ req->state = ALLOC_DATA_L3f;
+ block_alloc( req->block, write_cb, req );
+ } else {
+ req->state = WRITE_DATA;
+ block_write( addr, req->block, write_cb, req );
+ }
+ break;
+
case WRITE_DATA:
DPRINTF("WRITE_DATA\n");
block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req);
break;
- /* L3 Zero Path: */\r
-\r
- case ALLOC_DATA_L3z:\r
-\r
- DPRINTF("ALLOC_DATA_L3z\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L3][L3_IDX(req->vaddr)] = a;\r
+ /* L3 Zero Path: */
+
+ case ALLOC_DATA_L3z:
+
+ DPRINTF("ALLOC_DATA_L3z\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L3][L3_IDX(req->vaddr)] = a;
bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1];
req->bi.unused = 102;
*bi = req->bi;
- req->state = WRITE_L3_L3z;\r
- block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req);\r
- break;\r
- \r
- /* L3 Fault Path: */\r
-\r
- case ALLOC_DATA_L3f:\r
+ req->state = WRITE_L3_L3z;
+ block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req);
+ break;
+
+ /* L3 Fault Path: */
+
+ case ALLOC_DATA_L3f:
- DPRINTF("ALLOC_DATA_L3f\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L3][L3_IDX(req->vaddr)] = a;\r
+ DPRINTF("ALLOC_DATA_L3f\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L3][L3_IDX(req->vaddr)] = a;
bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1];
req->bi.unused = 103;
*bi = req->bi;
- req->state = WRITE_L3_L3f;\r
- block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req);\r
- break;\r
-\r
- /* L2 Zero Path: */\r
- \r
- case ALLOC_DATA_L2z:\r
-\r
- DPRINTF("ALLOC_DATA_L2z\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L3] = newblock();\r
- req->radix[L3][L3_IDX(req->vaddr)] = a;\r
+ req->state = WRITE_L3_L3f;
+ block_write(req->radix_addr[L3], (char*)req->radix[L3], write_cb, req);
+ break;
+
+ /* L2 Zero Path: */
+
+ case ALLOC_DATA_L2z:
+
+ DPRINTF("ALLOC_DATA_L2z\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L3] = newblock();
+ req->radix[L3][L3_IDX(req->vaddr)] = a;
bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1];
req->bi.unused = 104;
*bi = req->bi;
- req->state = ALLOC_L3_L2z;\r
- block_alloc( (char*)req->radix[L3], write_cb, req );\r
- break;\r
-\r
- case ALLOC_L3_L2z:\r
-\r
- DPRINTF("ALLOC_L3_L2z\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L2][L2_IDX(req->vaddr)] = a;\r
- req->state = WRITE_L2_L2z;\r
- block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req);\r
- break;\r
- \r
- /* L2 Fault Path: */\r
- \r
- case READ_L3_L2f:\r
- \r
- DPRINTF("READ_L3_L2f\n");\r
- node = (radix_tree_node) IO_BLOCK(r);\r
+ req->state = ALLOC_L3_L2z;
+ block_alloc( (char*)req->radix[L3], write_cb, req );
+ break;
+
+ case ALLOC_L3_L2z:
+
+ DPRINTF("ALLOC_L3_L2z\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L2][L2_IDX(req->vaddr)] = a;
+ req->state = WRITE_L2_L2z;
+ block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req);
+ break;
+
+ /* L2 Fault Path: */
+
+ case READ_L3_L2f:
+
+ DPRINTF("READ_L3_L2f\n");
+ node = (radix_tree_node) IO_BLOCK(r);
clear_L3_w_bits(node);
- if (node == NULL) goto fail;\r
- a = node[L2_IDX(req->vaddr)];\r
- addr = getid(a);\r
-\r
- req->radix[L3] = node;\r
+ if (node == NULL) goto fail;
+ a = node[L2_IDX(req->vaddr)];
+ addr = getid(a);
+
+ req->radix[L3] = node;
req->state = ALLOC_DATA_L2f;
- block_alloc( req->block, write_cb, req );\r
- break;\r
- \r
- case ALLOC_DATA_L2f:\r
-\r
- DPRINTF("ALLOC_DATA_L2f\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L3][L3_IDX(req->vaddr)] = a;\r
+ block_alloc( req->block, write_cb, req );
+ break;
+
+ case ALLOC_DATA_L2f:
+
+ DPRINTF("ALLOC_DATA_L2f\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L3][L3_IDX(req->vaddr)] = a;
bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1];
req->bi.unused = 105;
*bi = req->bi;
- req->state = ALLOC_L3_L2f;\r
- block_alloc( (char*)req->radix[L3], write_cb, req );\r
- break;\r
-\r
- case ALLOC_L3_L2f:\r
-\r
- DPRINTF("ALLOC_L3_L2f\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L2][L2_IDX(req->vaddr)] = a;\r
- req->state = WRITE_L2_L2f;\r
- block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req);\r
- break;\r
- \r
- /* L1 Zero Path: */\r
- \r
- case ALLOC_DATA_L1z:\r
-\r
- DPRINTF("ALLOC_DATA_L1z\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L3] = newblock();\r
- req->radix[L3][L3_IDX(req->vaddr)] = a;\r
+ req->state = ALLOC_L3_L2f;
+ block_alloc( (char*)req->radix[L3], write_cb, req );
+ break;
+
+ case ALLOC_L3_L2f:
+
+ DPRINTF("ALLOC_L3_L2f\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L2][L2_IDX(req->vaddr)] = a;
+ req->state = WRITE_L2_L2f;
+ block_write(req->radix_addr[L2], (char*)req->radix[L2], write_cb, req);
+ break;
+
+ /* L1 Zero Path: */
+
+ case ALLOC_DATA_L1z:
+
+ DPRINTF("ALLOC_DATA_L1z\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L3] = newblock();
+ req->radix[L3][L3_IDX(req->vaddr)] = a;
bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1];
req->bi.unused = 106;
*bi = req->bi;
- req->state = ALLOC_L3_L1z;\r
- block_alloc( (char*)req->radix[L3], write_cb, req );\r
- break;\r
+ req->state = ALLOC_L3_L1z;
+ block_alloc( (char*)req->radix[L3], write_cb, req );
+ break;
+
+ case ALLOC_L3_L1z:
+
+ DPRINTF("ALLOC_L3_L1z\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L2] = newblock();
+ req->radix[L2][L2_IDX(req->vaddr)] = a;
+ req->state = ALLOC_L2_L1z;
+ block_alloc( (char*)req->radix[L2], write_cb, req );
+ break;
+
+ case ALLOC_L2_L1z:
+
+ DPRINTF("ALLOC_L2_L1z\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L1][L1_IDX(req->vaddr)] = a;
+ req->state = WRITE_L1_L1z;
+ block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req);
+ break;
+
+ /* L1 Fault Path: */
+
+ case READ_L2_L1f:
+
+ DPRINTF("READ_L2_L1f\n");
+ node = (radix_tree_node) IO_BLOCK(r);
+ clear_w_bits(node);
+ if (node == NULL) goto fail;
+ a = node[L2_IDX(req->vaddr)];
+ addr = getid(a);
+
+ req->radix_addr[L3] = addr;
+ req->radix[L2] = node;
- case ALLOC_L3_L1z:\r
-\r
- DPRINTF("ALLOC_L3_L1z\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L2] = newblock();\r
- req->radix[L2][L2_IDX(req->vaddr)] = a;\r
- req->state = ALLOC_L2_L1z;\r
- block_alloc( (char*)req->radix[L2], write_cb, req );\r
- break;\r
-\r
- case ALLOC_L2_L1z:\r
-\r
- DPRINTF("ALLOC_L2_L1z\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L1][L1_IDX(req->vaddr)] = a;\r
- req->state = WRITE_L1_L1z;\r
- block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req);\r
- break;\r
-\r
- /* L1 Fault Path: */\r
- \r
- case READ_L2_L1f:\r
- \r
- DPRINTF("READ_L2_L1f\n");\r
- node = (radix_tree_node) IO_BLOCK(r);\r
- clear_w_bits(node);\r
- if (node == NULL) goto fail;\r
- a = node[L2_IDX(req->vaddr)];\r
- addr = getid(a);\r
-\r
- req->radix_addr[L3] = addr;\r
- req->radix[L2] = node;\r
- \r
- if (addr == ZERO) {\r
+ if (addr == ZERO) {
/* nothing below L2, create an empty L3 and alloc data. */
/* (So skip READ_L3_L1f.) */
req->radix[L3] = newblock();
req->state = ALLOC_DATA_L1f;
block_alloc( req->block, write_cb, req );
- } else {\r
+ } else {
req->state = READ_L3_L1f;
block_read( addr, write_cb, req );
- }\r
- break;\r
- \r
- case READ_L3_L1f:\r
- \r
- DPRINTF("READ_L3_L1f\n");\r
- node = (radix_tree_node) IO_BLOCK(r);\r
+ }
+ break;
+
+ case READ_L3_L1f:
+
+ DPRINTF("READ_L3_L1f\n");
+ node = (radix_tree_node) IO_BLOCK(r);
clear_L3_w_bits(node);
- if (node == NULL) goto fail;\r
- a = node[L2_IDX(req->vaddr)];\r
- addr = getid(a);\r
-\r
- req->radix[L3] = node;\r
+ if (node == NULL) goto fail;
+ a = node[L2_IDX(req->vaddr)];
+ addr = getid(a);
+
+ req->radix[L3] = node;
req->state = ALLOC_DATA_L1f;
- block_alloc( req->block, write_cb, req );\r
- break;\r
- \r
- case ALLOC_DATA_L1f:\r
-\r
- DPRINTF("ALLOC_DATA_L1f\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L3][L3_IDX(req->vaddr)] = a;\r
+ block_alloc( req->block, write_cb, req );
+ break;
+
+ case ALLOC_DATA_L1f:
+
+ DPRINTF("ALLOC_DATA_L1f\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L3][L3_IDX(req->vaddr)] = a;
bi = (struct block_info *) &req->radix[L3][L3_IDX(req->vaddr)+1];
req->bi.unused = 107;
*bi = req->bi;
- req->state = ALLOC_L3_L1f;\r
- block_alloc( (char*)req->radix[L3], write_cb, req );\r
- break;\r
-\r
- case ALLOC_L3_L1f:\r
-\r
- DPRINTF("ALLOC_L3_L1f\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L2][L2_IDX(req->vaddr)] = a;\r
- req->state = ALLOC_L2_L1f;\r
- block_alloc( (char*)req->radix[L2], write_cb, req );\r
- break;\r
-\r
- case ALLOC_L2_L1f:\r
-\r
- DPRINTF("ALLOC_L2_L1f\n");\r
- addr = IO_ADDR(r);\r
- a = writable(addr);\r
- req->radix[L1][L1_IDX(req->vaddr)] = a;\r
- req->state = WRITE_L1_L1f;\r
- block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req);\r
- break;\r
-\r
+ req->state = ALLOC_L3_L1f;
+ block_alloc( (char*)req->radix[L3], write_cb, req );
+ break;
+
+ case ALLOC_L3_L1f:
+
+ DPRINTF("ALLOC_L3_L1f\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L2][L2_IDX(req->vaddr)] = a;
+ req->state = ALLOC_L2_L1f;
+ block_alloc( (char*)req->radix[L2], write_cb, req );
+ break;
+
+ case ALLOC_L2_L1f:
+
+ DPRINTF("ALLOC_L2_L1f\n");
+ addr = IO_ADDR(r);
+ a = writable(addr);
+ req->radix[L1][L1_IDX(req->vaddr)] = a;
+ req->state = WRITE_L1_L1f;
+ block_write(req->radix_addr[L1], (char*)req->radix[L1], write_cb, req);
+ break;
+
case WRITE_L3:
- case WRITE_L3_L3z:\r
- case WRITE_L3_L3f:\r
- case WRITE_L2_L2z:\r
- case WRITE_L2_L2f:\r
- case WRITE_L1_L1z:\r
- case WRITE_L1_L1f:\r
- {\r
- int i;\r
- DPRINTF("DONE\n");\r
- /* free any saved node vals. */\r
- for (i=0; i<3; i++)\r
+ case WRITE_L3_L3z:
+ case WRITE_L3_L3f:
+ case WRITE_L2_L2z:
+ case WRITE_L2_L2f:
+ case WRITE_L1_L1z:
+ case WRITE_L1_L1f:
+ {
+ int i;
+ DPRINTF("DONE\n");
+ /* free any saved node vals. */
+ for (i=0; i<3; i++)
if (req->radix[i] != 0) free(req->radix[i]);
- req->retval = r;\r
- req->state = WRITE_UNLOCKED;\r
- block_wunlock(req->lock, L1_IDX(req->vaddr), write_cb, req);\r
- break;\r
- }\r
- case WRITE_UNLOCKED:\r
- {\r
+ req->retval = r;
+ req->state = WRITE_UNLOCKED;
+ block_wunlock(req->lock, L1_IDX(req->vaddr), write_cb, req);
+ break;
+ }
+ case WRITE_UNLOCKED:
+ {
struct io_ret r;
io_cb_t cb;
- DPRINTF("WRITE_UNLOCKED!\n");\r
- req_param = req->param;\r
- r = req->retval;\r
- cb = req->cb;\r
+ DPRINTF("WRITE_UNLOCKED!\n");
+ req_param = req->param;
+ r = req->retval;
+ cb = req->cb;
free(req);
- cb(r, req_param);\r
- break;\r
- }\r
- \r
- default:\r
- DPRINTF("*** Write: Bad state! (%d) ***\n", req->state);\r
- goto fail;\r
- }\r
- \r
- return;\r
- \r
- fail:\r
+ cb(r, req_param);
+ break;
+ }
+
+ default:
+ DPRINTF("*** Write: Bad state! (%d) ***\n", req->state);
+ goto fail;
+ }
+
+ return;
+
+ fail:
{
struct io_ret r;
io_cb_t cb;
int i;
DPRINTF("asyn_write had a read error mid-way.\n");
- req_param = req->param;\r
- cb = req->cb;\r
- r.type = IO_INT_T;\r
- r.u.i = -1;\r
+ req_param = req->param;
+ cb = req->cb;
+ r.type = IO_INT_T;
+ r.u.i = -1;
/* free any saved node vals. */
for (i=0; i<3; i++)
if (req->radix[i] != 0) free(req->radix[i]);
- free(req);\r
- cb(r, req_param);\r
+ free(req);
+ cb(r, req_param);
}
-}\r
-\r
+}
+
char *vdi_read_s(vdi_t *vdi, u64 vaddr)
{
pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;